Instalar librerias necesarias (en modo silencioso -q):
[1]:
!wget -q https://raw.githubusercontent.com/arqlm/arqlm.github.io/main/_static/libraries.txt
!pip install -q -r /content/libraries.txt
!pip install -q --upgrade plotly[kaleido]
!rm -r /content/libraries.txt
!rm -r /content/sample_data/ ## Esta linea no es necesaria cuando no se trabaja en colab.google
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.9/7.9 MB 35.8 MB/s eta 0:00:00
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 51.5/51.5 kB 1.7 MB/s eta 0:00:00
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 9.6/9.6 MB 64.7 MB/s eta 0:00:00
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 51.3/51.3 kB 2.9 MB/s eta 0:00:00
[18]:
## Importar librerias
import pandas as pd
import numpy as np
import base64
import datetime
import io
import plotly.io as pio
import plotly.graph_objects as go
import plotly.express as px
import plotly.offline as py
# pio.renderers.default = "colab"
pio.renderers.default = "notebook" # usar esta linea en jupyter notebook o vscode
3. Búsqueda de controles geológicos (continuación, por Alteración)
3.1. Carga de archivos
[19]:
import pandas as pd
import numpy as np
import warnings
warnings.simplefilter(action="ignore")
# Cargar base de datos en pandas
DH = pd.read_csv('EvYacData.csv', sep=',', encoding='latin1')
DH['cu_pct'][DH['cu_pct'] <= 0] = np.nan
DH
[19]:
| Este | Norte | Elevación | au_ppm | ag_ppm | cu_pct | aucn_ppm | cucn_ppm | Zmin | Alte | Lito | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 472186.686 | 6925804.447 | 4220.763 | -99.00 | -99.0 | NaN | -99.0 | -99.0 | OXI | ILL_CLO | IBX_MM |
| 1 | 472187.202 | 6925805.493 | 4213.861 | 0.30 | 2.3 | 0.011 | -99.0 | -99.0 | OXI | ILL_CLO | IBX_MM |
| 2 | 472187.343 | 6925805.770 | 4211.986 | 0.47 | 16.2 | 0.032 | -99.0 | -99.0 | OXI | ILL_CLO | IBX_MM |
| 3 | 472187.493 | 6925806.060 | 4210.013 | 0.31 | 2.3 | 0.018 | -99.0 | -99.0 | OXI | ILL_CLO | IBX_MM |
| 4 | 472187.642 | 6925806.351 | 4208.040 | 0.29 | 2.1 | 0.010 | -99.0 | -99.0 | OXI | ILL_CLO | IBX_MM |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 77836 | 471293.198 | 6925823.101 | 3761.674 | 0.03 | 1.0 | 0.002 | -99.0 | -99.0 | BACK | PROP | ECS |
| 77837 | 471293.540 | 6925824.040 | 3759.942 | 0.01 | 0.7 | 0.001 | -99.0 | -99.0 | BACK | PROP | ECS |
| 77838 | 471293.882 | 6925824.980 | 3758.209 | 0.01 | 0.4 | 0.001 | -99.0 | -99.0 | BACK | PROP | ECS |
| 77839 | 471294.224 | 6925825.920 | 3756.477 | 0.06 | 1.3 | 0.004 | -99.0 | -99.0 | BACK | PROP | ECS |
| 77840 | 471294.607 | 6925826.972 | 3754.538 | 0.06 | 0.5 | 0.003 | -99.0 | -99.0 | BACK | PROP | ECS |
77841 rows × 11 columns
3.2. Distribución estadística por Alteración
Calculamos los gráficos de probabilidad de la ley de cobre para los diferentes tipo de alteración (Figura 2). La descripción de los tipos de alteración se presenta en la Tabla 2.
[20]:
DH["Alte"] = DH["Alte"].astype(str)
fig = px.scatter_3d(DH, x='Este', y='Norte', z='Elevación', color='Alte',category_orders={'Alte': sorted(DH.groupby('Alte').groups.keys())})
fig.update_traces(marker=dict(size=2.0))
## Mejor estilo para leyenda
fig.update_layout(
hovermode=False,
legend_title_text='Alteración',
legend=dict(
itemsizing='constant',
font=dict(size=14),
bgcolor='rgba(255,255,255,0.8)',
bordercolor='black',
borderwidth=1,
)
)
fig.show()
[21]:
import numpy as np
import plotly.graph_objects as go
import scipy.stats as stats
from statsmodels.distributions.empirical_distribution import ECDF
# Plot all probability curves in the same figure
fig = go.Figure()
for category in sorted(DH.groupby('Alte').groups.keys()):
values = DH.groupby('Alte').get_group(category)['cu_pct'].dropna().values
# Sort values
values_sorted = np.sort(values)
n = len(values_sorted)
cumprob = (np.arange(1, n+1) - 0.5) / n
normal_scores = stats.norm.ppf(cumprob)
fig.add_trace(go.Scatter(
x=values_sorted,
y=normal_scores,
mode='markers',
marker=dict(size=5, opacity=0.7),
name=str(category)
))
# Custom y-axis to match imagen.png
y_ticks = stats.norm.ppf([0.0001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.99999])
y_ticklabels = ['0.001%','1%', '5%', '10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '95%', '99%', '99.999%']
fig.update_layout(
title="Gráfico de probabilidad log-normal por Alteración",
title_x=0.5,
xaxis_title='Cu [%]',
xaxis=dict(
type='log',
),
yaxis_title='Probabilidad',
yaxis=dict(
tickmode='array',
tickvals=y_ticks,
ticktext=y_ticklabels,
range=[stats.norm.ppf(0.00001), stats.norm.ppf(0.99999)]
),
width=800,
height=600,
font_family="Times New Roman",
font_size=14,
font_color="black",
plot_bgcolor='white',
autosize=False,
showlegend=True
)
fig.update_layout(
legend_title_text='Alteración',
legend=dict(
itemsizing='constant',
font=dict(size=14),
bgcolor='rgba(255,255,255,0.8)',
bordercolor='black',
borderwidth=1,
)
)
fig.update_xaxes(gridcolor='rgba(0,0,0,0.1)')
fig.update_yaxes(gridcolor='rgba(0,0,0,0.1)')
fig.update_layout(hovermode=False)
fig.show()
[22]:
stats_by_mine = DH.groupby(['Alte']).describe().round(2)
stats_by_mine['cu_pct']
[22]:
| count | mean | std | min | 25% | 50% | 75% | max | |
|---|---|---|---|---|---|---|---|---|
| Alte | ||||||||
| 0 | 3032.0 | 0.01 | 0.02 | 0.0 | 0.00 | 0.00 | 0.00 | 0.37 |
| ALUVIO | 119.0 | 0.01 | 0.01 | 0.0 | 0.00 | 0.00 | 0.01 | 0.03 |
| BACK | 1088.0 | 0.23 | 0.27 | 0.0 | 0.03 | 0.14 | 0.35 | 2.12 |
| ILL_CLO | 16860.0 | 0.05 | 0.11 | 0.0 | 0.01 | 0.02 | 0.06 | 6.02 |
| POT_BT | 20535.0 | 0.19 | 0.17 | 0.0 | 0.07 | 0.16 | 0.25 | 4.88 |
| POT_KFELD | 13458.0 | 0.31 | 0.30 | 0.0 | 0.16 | 0.27 | 0.39 | 8.30 |
| PROP | 3249.0 | 0.05 | 0.18 | 0.0 | 0.00 | 0.02 | 0.05 | 8.00 |
| QTZ_SER_PY | 9502.0 | 0.10 | 0.13 | 0.0 | 0.02 | 0.04 | 0.14 | 2.55 |
| SER_CLO | 9064.0 | 0.10 | 0.14 | 0.0 | 0.02 | 0.04 | 0.14 | 2.75 |
[23]:
## DH_filtered = DH[(DH['Alte'] == '0') | (DH['Alte'] == 'ALUVIO')] # Agrupamos las dos zonas con menor ley y distribuciones similares
## fig = px.scatter_3d(DH_filtered, x='Este', y='Norte', z='Elevación', color='cu_pct', color_continuous_scale=px.colors.sequential.Rainbow[1:], range_color=[0.0, DH['cu_pct'].quantile(0.95)])
## fig.update_traces(marker=dict(size=2.0))
## fig.show()
DH_filtered = DH[(DH['Alte'] == 'PROP') | (DH['Alte'] == 'ILL_CLO')] # Agrupamos las dos zonas con segunda menor ley y distribuciones similares
fig = px.scatter_3d(DH_filtered, x='Este', y='Norte', z='Elevación', color='cu_pct', color_continuous_scale=px.colors.sequential.Rainbow[1:], range_color=[0.0, DH['cu_pct'].quantile(0.95)])
fig.update_traces(marker=dict(size=2.0))
fig.update_layout(hovermode=False)
fig.show()
DH_filtered = DH[(DH['Alte'] == 'SER_CLO') | (DH['Alte'] == 'QTZ_SER_PY')] # Agrupamos las tres zonas con segunda mayor ley y distribuciones similares
fig = px.scatter_3d(DH_filtered, x='Este', y='Norte', z='Elevación', color='cu_pct', color_continuous_scale=px.colors.sequential.Rainbow[1:], range_color=[0.0, DH['cu_pct'].quantile(0.95)])
fig.update_traces(marker=dict(size=2.0))
fig.update_layout(hovermode=False)
fig.show()
DH_filtered = DH[(DH['Alte'] == 'POT_BT') | (DH['Alte'] == 'POT_KFELD') | (DH['Alte'] == 'BACK')] # Agrupamos las dos zonas con mayor ley y distribuciones similares
fig = px.scatter_3d(DH_filtered, x='Este', y='Norte', z='Elevación', color='cu_pct', color_continuous_scale=px.colors.sequential.Rainbow[1:], range_color=[0.0, DH['cu_pct'].quantile(0.95)])
fig.update_traces(marker=dict(size=2.0))
fig.update_layout(hovermode=False)
fig.show()